Testing the different components of the enveloppe

Motion Clouds: testing components of the envelope

In [1]:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output
In [2]:
import os, sys
sys.path.append('..')
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
#mc.recompute = True
mc.notebook = True
  Hide output

Testing the color

In [3]:
%bookmark root
  Hide output
In [4]:
%cd..
  Hide output
/Users/lolo/Dropbox/science/MotionClouds

In [5]:
%run test_color.py
  Hide output
WARNING:traits.has_traits:DEPRECATED: traits.has_traits.wrapped_class, 'the 'implements' class advisor has been deprecated. Use the 'provides' class decorator.

Could not import Mayavi
color

color-alpha-0_0

color-alpha-0_5

color-alpha-1_0

color-alpha-1_5

color-alpha-2_0

color-ft_0-0_125

color-ft_0-0_25

color-ft_0-0_5

color-ft_0-1_0

color-ft_0-2_0

color-ft_0-4_0

color-ft_0-inf

[6350 %] elapsed[sec]: 0.958 | ETA[sec]: -0.943 
Saving sequence color-contrast-0_1.mp4
Title: 
                      Started: 06/18/2014 17:29:41
                      Finished: 06/18/2014 17:29:42
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-contrast-0_25.mp4
[6350 %] elapsed[sec]: 0.670 | ETA[sec]: -0.659 

Title: 
                      Started: 06/18/2014 17:29:44
                      Finished: 06/18/2014 17:29:44
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-contrast-0_5.mp4
[6350 %] elapsed[sec]: 0.602 | ETA[sec]: -0.592 

Title: 
                      Started: 06/18/2014 17:29:46
                      Finished: 06/18/2014 17:29:46
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-contrast-0_75.mp4
[6350 %] elapsed[sec]: 0.593 | ETA[sec]: -0.584 

Title: 
                      Started: 06/18/2014 17:29:48
                      Finished: 06/18/2014 17:29:48
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-contrast-1_0.mp4
[6350 %] elapsed[sec]: 0.584 | ETA[sec]: -0.575 

Title: 
                      Started: 06/18/2014 17:29:49
                      Finished: 06/18/2014 17:29:50
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-contrast-2_0.mp4
[6350 %] elapsed[sec]: 0.575 | ETA[sec]: -0.566 

Title: 
                      Started: 06/18/2014 17:29:51
                      Finished: 06/18/2014 17:29:52
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-energy_contrast-0_1.mp4
[6350 %] elapsed[sec]: 0.612 | ETA[sec]: -0.602 

Title: 
                      Started: 06/18/2014 17:29:53
                      Finished: 06/18/2014 17:29:54
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-energy_contrast-0_25.mp4
[6350 %] elapsed[sec]: 0.612 | ETA[sec]: -0.603 

Title: 
                      Started: 06/18/2014 17:29:55
                      Finished: 06/18/2014 17:29:56
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-energy_contrast-0_5.mp4
[6350 %] elapsed[sec]: 0.578 | ETA[sec]: -0.569 

Title: 
                      Started: 06/18/2014 17:29:57
                      Finished: 06/18/2014 17:29:57
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-energy_contrast-0_75.mp4
[6350 %] elapsed[sec]: 0.651 | ETA[sec]: -0.641 

Title: 
                      Started: 06/18/2014 17:29:59
                      Finished: 06/18/2014 17:29:59
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-energy_contrast-1_0.mp4
[6350 %] elapsed[sec]: 0.645 | ETA[sec]: -0.635 

Title: 
                      Started: 06/18/2014 17:30:01
                      Finished: 06/18/2014 17:30:01
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-energy_contrast-2_0.mp4
[6350 %] elapsed[sec]: 0.805 | ETA[sec]: -0.792 

Title: 
                      Started: 06/18/2014 17:30:03
                      Finished: 06/18/2014 17:30:03
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-seed-123456.mp4
[6350 %] elapsed[sec]: 0.579 | ETA[sec]: -0.570 

Title: 
                      Started: 06/18/2014 17:30:05
                      Finished: 06/18/2014 17:30:05
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-seed-123457.mp4
[6350 %] elapsed[sec]: 0.559 | ETA[sec]: -0.550 

Title: 
                      Started: 06/18/2014 17:30:06
                      Finished: 06/18/2014 17:30:07
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-seed-123458.mp4
[6350 %] elapsed[sec]: 0.595 | ETA[sec]: -0.585 

Title: 
                      Started: 06/18/2014 17:30:08
                      Finished: 06/18/2014 17:30:09
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-seed-123459.mp4
[6350 %] elapsed[sec]: 0.559 | ETA[sec]: -0.550 

Title: 
                      Started: 06/18/2014 17:30:10
                      Finished: 06/18/2014 17:30:11
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-seed-123460.mp4
[6350 %] elapsed[sec]: 0.576 | ETA[sec]: -0.573 

Title: 
                      Started: 06/18/2014 17:30:12
                      Finished: 06/18/2014 17:30:12
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-seed-123461.mp4
[6350 %] elapsed[sec]: 0.653 | ETA[sec]: -0.643 

Title: 
                      Started: 06/18/2014 17:30:14
                      Finished: 06/18/2014 17:30:14
                      Total time elapsed: 0.000 sec

[  0 %] elapsed[sec]: 0.000

Saving sequence color-seed-123462.mp4
[6350 %] elapsed[sec]: 0.729 | ETA[sec]: -0.718 

Title: 
                      Started: 06/18/2014 17:30:16
                      Finished: 06/18/2014 17:30:16
                      Total time elapsed: 0.000 sec
color-size-5

color-size-6

color-size_T-5

color-size_T-6

Testing the speed

In [6]:
%run test_speed.py
  Hide output
speed

speed-V_X--1_0

speed-V_X--0_5

speed-V_X-0_0

speed-V_X-0_1

speed-V_X-0_5

speed-V_X-1_0

speed-V_X-4_0

speed-V_Y--1_0

speed-V_Y--0_5

speed-V_Y-0_5

speed-V_Y-1_0

speed-V_Y-2_0

speed-B_V-0_001

speed-B_V-0_01

speed-B_V-0_05

speed-B_V-0_1

speed-B_V-0_5

speed-B_V-1_0

speed-B_V-10_0

Exploring the orientation component of the envelope around a grating.

In [7]:
%run test_grating.py
  Hide output
grating

grating-sparseness-0_9

grating-sparseness-1_34285714286

grating-sparseness-1_78571428571

grating-sparseness-2_22857142857

grating-sparseness-2_67142857143

grating-sparseness-3_11428571429

grating-sparseness-3_55714285714

grating-sparseness-4_0

grating-largeband-B_theta-pi-over-1

grating-largeband-B_theta-pi-over-2

grating-largeband-B_theta-pi-over-3

grating-largeband-B_theta-pi-over-5

grating-largeband-B_theta-pi-over-8

grating-largeband-B_theta-pi-over-13

grating-theta-pi-over-1

grating-theta-pi-over-2

grating-theta-pi-over-4

grating-theta-pi-over-3

grating-theta-pi-over-5

grating-theta-pi-over-8

grating-theta-pi-over-13

grating-theta-pi-over-20

grating-theta-pi-over-30

grating-B_theta-pi-over-1-V_X-1_0

grating-B_theta-pi-over-2-V_X-1_0

grating-B_theta-pi-over-3-V_X-1_0

grating-B_theta-pi-over-5-V_X-1_0

grating-B_theta-pi-over-8-V_X-1_0

grating-B_theta-pi-over-13-V_X-1_0

grating-B_sf-0_05

grating-B_sf-0_1

grating-B_sf-0_15

grating-B_sf-0_2

grating-B_sf-0_3

grating-B_sf-0_4

grating-B_sf-0_8

In [8]:
%run test_radial.py
  Hide output
radial

radial-B_sf-0_01

radial-B_sf-0_0334965439158

radial-B_sf-0_11220184543

radial-B_sf-0_375837404288

radial-B_sf-1_25892541179

radial-B_V-0_001

radial-B_V-0_01

radial-B_V-0_05

radial-B_V-0_1

radial-B_V-0_5

radial-B_V-1_0

radial-B_V-10_0

radial-sf_0-0_01

radial-sf_0-0_05

radial-sf_0-0_1

radial-sf_0-0_2

radial-sf_0-0_4

radial-sf_0-0_8

radial-sf_0_nologgabor-0_01

radial-sf_0_nologgabor-0_05

radial-sf_0_nologgabor-0_1

radial-sf_0_nologgabor-0_2

radial-sf_0_nologgabor-0_4

radial-sf_0_nologgabor-0_8

radial-seed-123456

radial-seed-123456

radial-seed-123457

radial-seed-None

radial-seed-None

radial-V_X-0_0

radial-V_X-0_5

radial-V_X-1_0

radial-V_X--1_0

In [9]:
%cd -b root
  Hide output
(bookmark:root) -> /Users/lolo/Dropbox/science/MotionClouds/posts
/Users/lolo/Dropbox/science/MotionClouds/posts

Testing competing Motion Clouds with psychopy

Analysis of a discrimination experiment

In the psychopy_competition.py script, we implemented an experiment to test whether one could discriminate the dominating motionin a sum of two motion cloudsin opposite directions.

Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.

In []:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output

The experiment

In [1]:
#%pycat psychopy_competing.py
  Hide output

Analysis, version 1

In a first version of the experiment, we only stored the results in a numpy file:

In [2]:
!ls  data/competing_v1_*npy
  Hide output
data/competing_v1_bruno_Dec_14_1210.npy  data/competing_v1_lup_Dec_12_1003.npy	data/competing_v1_lup_Dec_12_1013.npy  data/competing_v1_lup_Dec_14_1201.npy  data/competing_v1_meduz_Dec_14_1204.npy

In [3]:
import glob
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('data/competing_v1_*npy'):
    results = np.load(fn)
    print 'experiment ', fn, ', # trials=', results.shape[1]
    ax.scatter(results[1, :], results[0, :])
_ = ax.axis([0., 1., -1.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax.set_ylabel('perceived direction')
  Hide output
experiment  data/competing_v1_bruno_Dec_14_1210.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_12_1003.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_12_1013.npy , # trials= 50
experiment  data/competing_v1_lup_Dec_14_1201.npy , # trials= 10
experiment  data/competing_v1_meduz_Dec_14_1204.npy , # trials= 50

In [4]:
alpha = .3
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
data = []
for fn in glob.glob('data/competing_v1_*npy'):
    results = np.load(fn)
    data_ = np.empty(results.shape)
    # lower stronger, detected lower = CORRECT is 1
    ax1.scatter(results[1, results[1, :]<.5], 
                1.*(results[0, results[1, :]<.5]==-1), 
                c='r', alpha=alpha)
    # copying data: contrast (from .5 to 1), correct 
    data_[0, results[1, :]<.5] = 1. - results[1, results[1, :]<.5]
    data_[1, results[1, :]<.5] = 1.*(results[0, results[1, :]<.5]==-1)
    # upper stronger, detected lower = INCORRECT is 1
    ax1.scatter(results[1, results[1, :]>.5], 
                1.*(results[0, results[1, :]>.5]==-1), 
                c='b', alpha=alpha)
    # lower stronger, detected upper = INCORRECT is 1
    ax2.scatter(results[1, results[1, :]<.5], 
                1.*(results[0, results[1, :]<.5]==1), 
                c='b', alpha=alpha, marker='x')
    # upper stronger, detected upper = CORRECT is 1
    ax2.scatter(results[1, results[1, :]>.5], 
                1.*(results[0, results[1, :]>.5]==1), 
                c='r', alpha=alpha, marker='x')
    # copying data: contrast (from .5 to 1), correct 
    data_[0, results[1, :]>=.5] = results[1, results[1, :]>=.5]
    data_[1, results[1, :]>=.5] = 1.*(results[0, results[1, :]>=.5]==1)
    data.append(data_)

for ax in [ax1, ax2]:
    ax.axis([0., 1., -.1, 1.1])
    _ = ax.set_xlabel('contrast')
_ = ax1.set_ylabel('detected lower')
_ = ax1.legend(['lower', 'upper'], loc='right')
_ = ax2.set_ylabel('detected upper')
_ = ax2.legend(['lower', 'upper'], loc='right')
  Hide output

(note the subplots are equivalent up to a flip)

One could not fit psychometric curves for this 2AFC. See for instance https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3110332/

In [30]:
n_hist = 10
C_bins = np.linspace(0.5, 1., n_hist)
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
for data_ in data:
    hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
    hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
    plt.bar(bins[:-1] + (bins[1]-bins[0])/2, (1.*hist_correct)/(1.*hist_total), width=.95*(bins[1]-bins[0]), alpha=.1)
_ = ax.axis([0.5, 1., 0., 1.1])
_ = ax.set_xlabel('contrast')
  Hide output

let's explore another way:

In [31]:
import pypsignifit as psi
  Hide output

data : an array or a list of lists containing stimulus intensities in the first column, number of correct responses (nAFC) or number of YES- responses in the second column, and number of trials in the third column. Each row should correspond to one experimental block. In addition, the sequence of the rows is taken as the sequence of data aquisition. Alternatively, the relative frequencies of correct responses resp YES responses can be given.

In [32]:
stimulus_intensities, percent_correct = np.array([]), np.array([])
for data_ in data:
    #print data_.shape
    #stimulus_intensities = np.hstack((stimulus_intensities, data_[0, : ]))
    #percent_correct = np.hstack((percent_correct, data_[1, : ]))
    #print data_[0, : ].min(), data_[0, : ].max(), data_[1, : ].mean()
    hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
    hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
    stimulus_intensities = np.hstack((stimulus_intensities, bins[:-1] + (bins[1]-bins[0])/2))
    percent_correct = np.hstack((percent_correct, (1.*hist_correct)/(1.*hist_total)))

num_observations = len(stimulus_intensities)
print 'num_observations: ', num_observations
psidata = np.vstack((stimulus_intensities, percent_correct, np.array([2]*num_observations)))
  Hide output
num_observations:  45

BootstrapInference

In [34]:
# see http://psignifit.sourceforge.net/DESIGN.html
nafc = 2
constraints = ( 'unconstrained', 'unconstrained', 'Beta(2,20)')
B = psi.BootstrapInference ( psidata, priors=constraints, nafc=nafc )
# (threshold, slope, lapse rate)
print B.estimate
  Hide output
[ 0.49862663  0.03601899  0.04546066]

How well do these parameters describe the data? The deviance is a measure that describes the goodness of fit for a model, based on the sum of the squares error metric:

In [35]:
print B.deviance, B.getThres()#, B.getCI(1)
  Hide output
0.186105675197 0.498626633193

In [36]:
_ = psi.psigniplot.plotPMF(B, xlabel_text='contrast', showaxes=True)
  Hide output
In [37]:
B.sample()
  Hide output
In [38]:
psi.GoodnessOfFit(B)
  Hide output
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-38-730472e77ec6> in <module>()
----> 1 psi.GoodnessOfFit(B)

/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.pyc in GoodnessOfFit(InferenceObject, warn)
    575     if infer in ["BayesInference","ASIRInference"]:
    576         InferenceObject.drawposteriorexamples ( ax=ax_plot )
--> 577     plotThres ( InferenceObject, ax=ax_plot )
    578     plotPMF   ( InferenceObject, ax=ax_plot, showdesc=True )
    579     if infer in ["BayesInference","ASIRInference"]:

/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.pyc in plotThres(InferenceObject, ax, color)
    480 
    481     for k,cut in enumerate(InferenceObject.cuts):
--> 482         c25,c975 = InferenceObject.getCI ( cut=k, conf=(.025,.975) )
    483         thres = InferenceObject.getThres ( cut )
    484         ylev = InferenceObject.evaluate ( [thres] )

/usr/local/lib/python2.7/site-packages/pypsignifit/psignidata.pyc in getCI(self, cut, conf, thres_or_slope)
    452                 vals.append(stats.norm.cdf( bias + ( stats.norm.ppf(pp) + bias ) / (1-acc*(stats.norm.ppf(pp) + bias )) ))
    453 
--> 454             return p.prctile ( self.__bthres[:,cut], 100*N.array(vals) )
    455         elif thres_or_slope[0] == "s":
    456             bias = self.__sl_bias[cut]

/usr/local/lib/python2.7/site-packages/matplotlib/mlab.pyc in prctile(x, p)
    953         frac[cond] += 1
    954 
--> 955     return _interpolate(values[ai],values[bi],frac)
    956 
    957 def prctile_rank(x, p):

IndexError: index -9223372036854775808 is out of bounds for size 2000
  • Top left: displays the given data points and the fitted psychometric function. Thresholds and confidence intervals are plotted at three levels (default: 25%, 50% and 75% ). Mind that the y-axis starts at 0.5 (the guess rate in a 2AFC task), therefore the 50% threshold is located at . signifies the deviance value.
  • Bottom left: histogram of bootstrapped deviance values (default = 2000 samples). The 95% confidence limits are indicated by red dotted lines and the actually observed deviance is indicated by the solid red line. If the observed deviance is outside the 95% confidence limits that indicates a bad fit and you will receive a warning message.
  • Top middle: deviance residuals are plotted as a function of the predicted correct response rate of the model (x-axis corresponds to y-axis in panel 1). This plot helps you to detect systematic deviations between the model and the data. The dotted line is the best linear fit that relates deviance residuals to the predicted correct response rate. Rpd gives the numerical value of that correlation. Note that the residuals are scaled to account for differences in the variability of a binomially distributed random variable (e.g. maximum variance at p=0.5).
  • Bottom middle: histogram of bootstrapped correlation coefficients for the correlation between residuals and performance level (same logic applies as in panel 2). Dotted lines denote 95% intervals of the sampled correlation coefficients, the solid line marks the observed correlation between model prediction and deviance residuals.
  • Top right: deviance residuals are plotted as a function of block index i.e. the sequence in which the data were acquired (WARNING: this graph can be properly interpreted only when stimulus intensities were fixed in separate blocks). If the observer was learning, the fitted linear correlation between residuals and block index should be positive.
  • Bottom right: histogram of bootstrapped correlation coefficients for the correlation between deviance residuals and block index (same logic applies as in panel 2 and 4).
In [13]:
priors = ( 'Gauss(0,5)', 'Gamma(1,3)', 'Beta(2,20)' )
  Hide output
In [14]:
mcmc = psi.BayesInference ( psidata, priors=priors, nafc=nafc )
  Hide output
Acceptance: 0.943333333333
Acceptance:
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: divide by zero encountered in log
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: invalid value encountered in multiply
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:63: RuntimeWarning: invalid value encountered in divide
  G2 = 2 * sum ( T*log(T/fitted) )
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:60: RuntimeWarning: invalid value encountered in double_scalars
  fitted[i,j,k] = T[i,j,:].sum() * T[:,j,k].sum() / T[:,j,:].sum()
/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:94: RuntimeWarning: divide by zero encountered in log
  G2 = 2 * sum ( T*log(T/fitted) )

 0.541666666667
Acceptance: 0.525
Acceptance: 0.506015960712

/usr/local/lib/python2.7/site-packages/pypsignifit/pygibbsit.py:94: RuntimeWarning: invalid value encountered in multiply
  G2 = 2 * sum ( T*log(T/fitted) )

In [15]:
mcmc.estimate
  Hide output
Out[15]:
array([-1.63854678,  2.99021992,  0.0866207 ])
In [16]:
psi.ConvergenceMCMC ( mcmc )
  Hide output
/usr/local/lib/python2.7/site-packages/pypsignifit/psignidata.py:1330: RuntimeWarning: invalid value encountered in double_scalars
  B = n * sum( (psi_i-psi_mean)**2 ) / (m-1)

In [17]:
psi.GoodnessOfFit ( mcmc )
  Hide output
/usr/local/lib/python2.7/site-packages/matplotlib/lines.py:503: RuntimeWarning: invalid value encountered in greater_equal
  return np.alltrue(x[1:] - x[0:-1] >= 0)
/usr/local/lib/python2.7/site-packages/pypsignifit/psigniplot.py:267: RuntimeWarning: invalid value encountered in greater_equal
  pval = N.mean( (simdata-observed)>=0 )

In [18]:
 psi.ParameterPlot ( mcmc )
  Hide output
Out[18]:
[<matplotlib.axes.Axes at 0x10cd42610>,
 <matplotlib.axes.Axes at 0x10ce1aa90>,
 <matplotlib.axes.Axes at 0x10cef8810>]
In [19]:
psi.plotInfluential(mcmc)
  Hide output
In [19]:
 
  Hide output
In []:
 
  Hide output

Testing basic functions of Motion Clouds

Motion Clouds: raw principles

Motion Clouds are synthesized textures which aim at having similar characteristics as natural images but with controlled parameters. There are many ways to achieve these results and this notebook aims at showing that different procedures from different communities (neurioscience, modelling, computer vision, ...) may produce similar results.

In [1]:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output

Using Fourier ("official Motion Clouds")

In [2]:
import sys
sys.path.append('..')
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
  Hide output

Using mixtures of images

In [3]:
import scipy.misc
lena = scipy.misc.lena() * 1.
lena -= lena.mean()
lena /= lena.std()
print lena.shape
  Hide output
(512, 512)

In [4]:
plt.imshow(lena, cmap=plt.cm.gray)
  Hide output
Out[4]:
<matplotlib.image.AxesImage at 0x11a19e810>
In [5]:
def noise(image=lena):
    for axis in [0, 1]:
        image = np.roll(image, np.random.randint(image.shape[axis]), axis=axis)
    return image
  Hide output
In [6]:
plt.imshow(noise(), cmap=plt.cm.gray)
  Hide output
Out[6]:
<matplotlib.image.AxesImage at 0x11bd08090>
In [7]:
plt.imshow(noise(), cmap=plt.cm.gray)
  Hide output
Out[7]:
<matplotlib.image.AxesImage at 0x11c24b190>

Using ARMA processes

Now, we define the ARMA process as an averaging process with a certain time constant $\tau=30.$ (in frames).

In [8]:
def ARMA(image, tau=30.):
    image = (1 - 1/tau)* image + 1/tau * noise()
    return image
  Hide output

initializing

In [9]:
image = ARMA(lena)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[9]:
<matplotlib.image.AxesImage at 0x11c7808d0>
In [10]:
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[10]:
<matplotlib.image.AxesImage at 0x11ccb9ed0>
In [11]:
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[11]:
<matplotlib.image.AxesImage at 0x11d1fb3d0>
In [12]:
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
  Hide output
Out[12]:
<matplotlib.image.AxesImage at 0x11d931890>
In [16]:
%cd..
N_frame = 1024
z = np.zeros((lena.shape[0], lena.shape[1], N_frame))
z[:, :, 0] = image
for i_frame in range(1, N_frame): 
    z[:, :, i_frame] = ARMA(z[:, :, i_frame-1])
mc.anim_save(.5 + .5*z, filename='results/arma')
  Hide output
[51150 %] elapsed[sec]: 146.872 | ETA[sec]: -146.585 
Saving sequence ../results/arma.mp4
Title: 
                      Started: 06/18/2014 17:36:44
                      Finished: 06/18/2014 17:39:11
                      Total time elapsed: 0.000 sec

In [18]:
mc.notebook = True
mc.in_show_video(name='arma')
  Hide output
/Users/lolo/Dropbox/science/MotionClouds
arma

Testing discrimination between Motion Clouds with psychopy

Analysis of a discrimination experiment

In the psychopy_discrimination.py script, we implemented an experiment to test whether the overall shape of motion clouds could influence discrimination of speed.

Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.

In [1]:
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
  Hide output

The experiment

In [2]:
#%pycat psychopy_discrimination.py
  Hide output

Analysis - version 1

In a first version of the experiment, we only stored the results in a numpy file.

In [11]:
!ls  ../data/discriminating_*
  Hide output
../data/discriminating_john_Jan_23_1515.npy  ../data/discriminating_lup_Jan_24_0914.npy  ../data/discriminating_lup_Jan_24_0937.npy
../data/discriminating_lup_Jan_23_1401.npy   ../data/discriminating_lup_Jan_24_0919.npy  ../data/discriminating_v2_lup_Feb_07_1409.pickle
../data/discriminating_lup_Jan_23_1735.npy   ../data/discriminating_lup_Jan_24_0931.npy  ../data/discriminating_v2_lup_Feb_07_1434.pickle

In [12]:
!ls  ../data/
  Hide output
competing_v1_bruno_Dec_14_1210.npy  competing_v1_meduz_Dec_14_1204.npy	 discriminating_lup_Jan_24_0914.npy  discriminating_v2_lup_Feb_07_1409.pickle
competing_v1_lup_Dec_12_1003.npy    discriminating_john_Jan_23_1515.npy  discriminating_lup_Jan_24_0919.npy  discriminating_v2_lup_Feb_07_1434.pickle
competing_v1_lup_Dec_12_1013.npy    discriminating_lup_Jan_23_1401.npy	 discriminating_lup_Jan_24_0931.npy
competing_v1_lup_Dec_14_1201.npy    discriminating_lup_Jan_23_1735.npy	 discriminating_lup_Jan_24_0937.npy

In [4]:
import glob
for fn in glob.glob('../data/discriminating_*npy'):
    results = np.load(fn)
    print fn, results.shape
    print('analyzing results')
    print '# of trials :', np.abs(results).sum(axis=1)
    print 'average results: ', (results.sum(axis=1)/np.abs(results).sum(axis=1)*.5+.5)
  Hide output
../data/discriminating_john_Jan_23_1515.npy (2, 50)
analyzing results
# of trials : [ 50.     24.508]
average results:  [ 0.48  1.  ]
../data/discriminating_lup_Jan_23_1401.npy (2, 50)
analyzing results
# of trials : [ 50.     28.126]
average results:  [ 0.66  1.  ]
../data/discriminating_lup_Jan_23_1735.npy (3, 50)
analyzing results
# of trials : [  9.  14.  13.]
average results:  [ 1.  1.  1.]
../data/discriminating_lup_Jan_24_0914.npy (3, 50)
analyzing results
# of trials : [ 17.  21.  12.]
average results:  [ 0.647  0.857  1.   ]
../data/discriminating_lup_Jan_24_0919.npy (3, 50)
analyzing results
# of trials : [ 10.  25.  15.]
average results:  [ 0.7    0.32   0.533]
../data/discriminating_lup_Jan_24_0931.npy (7, 50)
analyzing results
# of trials : [  3.   4.   8.   8.   7.  12.   8.]
average results:  [ 0.667  1.     0.625  0.375  1.     0.167  0.125]
../data/discriminating_lup_Jan_24_0937.npy (7, 50)
analyzing results
# of trials : [  7.   5.   6.  12.  10.   2.   8.]
average results:  [ 0.857  0.8    0.833  0.417  0.2    1.     0.375]

In [5]:
%whos
  Hide output
Variable   Type       Data/Info
-------------------------------
fn         str        ../data/discriminating_lup_Jan_24_0937.npy
glob       module     <module 'glob' from '/usr<...>/lib/python2.7/glob.pyc'>
np         module     <module 'numpy' from '/us<...>ages/numpy/__init__.pyc'>
plt        module     <module 'matplotlib.pyplo<...>s/matplotlib/pyplot.pyc'>
pylab      module     <module 'pylab' from '/us<...>site-packages/pylab.pyc'>
results    ndarray    7x50: 350 elems, type `float64`, 2800 bytes

Another solution is to use:

In [6]:
#data= 1
#np.savez('toto', data=data, results=results)
#print np.load('toto.npz')['data']
  Hide output

or directly psychopy.misc:

In [7]:
from psychopy import misc
  Hide output
In [9]:
#info = misc.fromFile('../data/discriminating.pickle')
  Hide output
In [10]:
alphas = [-1., -.5, 0., 0.5, 1., 1.5, 2.]
fileName = '../data/discriminating_' + info['observer'] + '.pickle'
info['alphas'] = alphas
info['results'] = results
#numpy.savez(fileName, results=results, alphas=alphas)
misc.toFile(fileName, info)
  Hide output
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-10-1bd6d05d02af> in <module>()
      1 alphas = [-1., -.5, 0., 0.5, 1., 1.5, 2.]
----> 2 fileName = '../data/discriminating_' + info['observer'] + '.pickle'
      3 info['alphas'] = alphas
      4 info['results'] = results
      5 #numpy.savez(fileName, results=results, alphas=alphas)

NameError: name 'info' is not defined

Analysis - version 2

In the second version, we stored more data:

In [14]:
import glob
from psychopy import misc
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('../data/discriminating_v2_*pickle'):
    data = misc.fromFile(fn)
    print fn, results.shape
    print('analyzing results')
    alphas = np.array(data['alphas'])
    print ' alphas = ', alphas
    print '# of trials :', np.abs(data['results']).sum(axis=1)
    print 'average results: ', (data['results'].sum(axis=1)/np.abs(data['results']).sum(axis=1)*.5+.5)
    width = (alphas.max()-alphas.min())/len(alphas)
    ax.bar(data['alphas'] - width/2, (data['results'].sum(axis=1)/np.abs(data['results']).sum(axis=1)*.5+.5), width=width, alpha=.3 )
#    for i_trial in xrange(data['results'].shape[1]):
#        ax.scatter(data['alphas'][data['results']
ax.set_xlabel('alpha')
ax.set_ylabel('% correct')
  Hide output
 ../data/discriminating_v2_lup_Feb_07_1409.pickle (7, 50)
analyzing results
 alphas =  [-1.  -0.5  0.   0.5  1.   1.5  2. ]
# of trials : [ 10.   4.   5.  11.   6.  11.   3.]
average results:  [ 0.7    1.     0.4    0.545  0.833  0.273  1.   ]
../data/discriminating_v2_lup_Feb_07_1434.pickle (7, 50)
analyzing results
 alphas =  [-1.  -0.5  0.   0.5  1.   1.5  2. ]
# of trials : [  4.   3.  10.  12.   8.   8.   5.]
average results:  [ 0.75   0.667  0.8    0.5    0.125  0.25   0.4  ]

Out[14]:
<matplotlib.text.Text at 0x1068affd0>
In []:
# TODO : correct for the number of trials / make global reading / make a new draw of alphas
# TODO : test f_0
  Hide output
In []:
  Hide output
    Share